Introduction

In this document, we will outline the Bayesian analogs of the statistical analyses described in lecture 1 (Github code).

Load packages

library(dplyr)
library(tibble)
library(purrr)
library(tidyr)
library(forcats)
library(gtools)
library(patchwork)
library(broom)
library(broom.mixed)
library(modelr)
library(brms)
library(tidybayes)
library(ggdist)
library(bayesplot)
library(ggplot2)
library(knitr)
BRM_BACKEND <- ifelse(require("cmdstanr"), 'cmdstanr', 'rstan')

Dataset

The following dataset is from experiment 2 of “How Relevant are Incidental Power Poses for HCI?” (Jansen & Hornbæk, 2018). Study participants were asked to either make an expansive posture or a constrictive posture before performing a task. The experiment investigated whether posture could potentially have an effect on risk taking behavior.

First, we load the data.

pose_df = readr::read_csv("data/poses_data.csv", show_col_types = FALSE) %>%
  mutate(condition) %>%
  group_by(participant)
head(pose_df %>% select(participant, condition, change))
participant condition change
1 expansive 3.362832
2 constrictive 29.147982
3 expansive 25.409836
4 constrictive 54.069767
5 expansive -36.644592
6 constrictive 29.756098

The data has been aggregated for each participant: - condition = expansive indicates expansive posture, and condition = constrictive indicates constrictive posture - The dependent variable is change which indicates the percentage change in risk-taking behavior. Thus, it is a continuous variable.

For the purposes of this demo, we are only concerned with these two variables. We can ignore the other variables for now.

Intuition of Bayesian Statistics

The Bayesian t-test (BEST) assumes that the data in the two conditions arises from two separate t-distributions. In the following section, we will describe the process for one of the conditions in the experiment.

We will use the \(Normal(\mu = 20, \sigma = 20)\) as the prior distribution.

First, we define some functions for manual calculation of the posterior normal distribution:

sigma_post = function(sigma_prior, sigma, n = 1) {
  sqrt(1 / (1 / (sigma_prior^2) + n / (sigma^2)))
}
mu_post = function(mu_prior, sigma_prior, mu, sigma, n = 1) {
  tau = sigma_post(sigma_prior, sigma, n)
  (tau^2 / sigma_prior^2)*mu_prior + (n * tau^2 / sigma^2)*mu
}
d.p2 = tibble(
  group = c("prior", "expansive", "constrictive", "posterior"), 
  mu = c(20, 32.82, 31.61, mu_post(20, 20, 32.82, 7.52)), 
  sd = c(20, 7.52, 7.06, sigma_post(20, 7.52))
) %>%
  mutate(
    cutoff_group = list(c(1:7)),
    cutoff = list(c(0, 15, 25, 30, 32.82, 40, 100))
  ) %>%
  unnest(c(cutoff_group, cutoff)) %>%
  mutate(
    cutoff = if_else(group == 'prior', 100, cutoff),
    x = map(cutoff, ~ seq(from = -40, ., by = 0.1)),
    y = pmap(list(x, mu, sd), ~ dnorm(..1, ..2, ..3))
  )

In the plot below, we show the raw data distribution for the two conditions:

p1 = pose_df %>%
  ggplot() +
  geom_point(aes(x = change, y = condition, colour = condition), 
             position = position_jitter(height = 0.1), alpha = 0.7) +
  scale_color_theme() + 
  labs(y = "Condition") +
  theme(
    legend.position = "none", 
    axis.line.y = element_blank(), 
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank()
  ) +
  scale_x_continuous(limits = c(-150, 250), breaks = seq(-150, 250, by = 50))

p2.blank = tibble(y = c("expansive", "constrictive"), x = 0) %>%
  ggplot(aes(x, y)) +
  scale_color_theme() + 
  theme_density

cowplot::plot_grid(p2.blank, p1, nrow = 2)

First, we define a function to help us plot different cutoff groups.

plot_preliminary <- function(data, group_num, group_name){
  data %>%
  filter(cutoff_group == group_num & group %in% group_name) %>%
  unnest(c(x, y)) %>%
  ggplot(aes(x, y)) +
  #geom_line(aes(color = group), size = 1) +
  # density
  geom_area(aes(fill = group, color = group), position = "identity", linewidth = 1, alpha = 0.3) +
  scale_x_continuous(limits = c(-40, 100)) +
  scale_y_continuous(limits = c(0, 0.1)) +
  scale_color_theme() +
  theme_density
}
cowplot::plot_grid(
  plot_preliminary(d.p2, 0, NULL), 
  p1, nrow = 2)

Next, we plot the prior density:

cowplot::plot_grid(
  plot_preliminary(d.p2, 1, 'prior'), 
  p1, nrow = 2)

Then we describe step by step, how the likelihood is computed:

cowplot::plot_grid(
  plot_preliminary(d.p2, 1, 'expansive'), 
  p1, nrow = 2)

cowplot::plot_grid(
  plot_preliminary(d.p2, 2, 'expansive'), 
  p1, nrow = 2)

cowplot::plot_grid(
  plot_preliminary(d.p2, 3, 'expansive'), 
  p1, nrow = 2)

cowplot::plot_grid(
  plot_preliminary(d.p2, 5, 'expansive'), 
  p1, nrow = 2)

cowplot::plot_grid(
  plot_preliminary(d.p2, 6, 'expansive'), 
  p1, nrow = 2)

cowplot::plot_grid(
  plot_preliminary(d.p2, 7, 'expansive'), 
  p1, nrow = 2)

We want to compute the posterior, which is the product of the prior and likelihood:

cowplot::plot_grid(
  plot_preliminary(d.p2, 7, c('prior', 'expansive', 'posterior')), 
  p1, nrow = 2)

Of course, if you have gganimate and magick working on your computer, you can try the following code to get a gif! Here, we are not going to show the code.

Fumeng: the content below needs better documentation….

Model 1: Equal standard deviations

Understanding the data

We plot the data distribution, the empirical density curve (blue line), and the theoretical density curve (black line).

pose_df %>%
  mutate(c = as.factor(condition)) %>%
  ggplot(aes(x = change)) +
  # data distribution
  geom_histogram(
    aes(y = ..density..),
    binwidth = 10,
    fill = theme_yellow,
    alpha = .75,
    color = 'white'
  ) +
  # empirical density curve
  geom_density(size = 1,
               adjust = 1.5,
               color = theme_blue) +
  #  theoretical density curve, a t distribition with mean = 16, sd = 19, and nu = 6
   geom_function(
    color = "#222222",
    linetype = 'dashed',
    fun = function(x)
      dstudent_t(x, mu = 16,  sigma = 39, df = 6),
    size = 1
  ) + 
  scale_x_continuous(limits = c(-200, 200)) +
  theme(
    axis.line.y = element_blank(),
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank()
  )

Step 1: model specification

Here we use a mathematical expression for the model above.

\[ \begin{align} y_{i} &\sim \mathrm{Student\_t}(\mu, \sigma_{0}, \nu_{0}) \\ \mu &= \beta_{0} + \beta_{1} * x_i \\ \sigma_{0} &\sim \mathrm{HalfNormal}(0, 10) \\ \beta_{0} &\sim \mathrm{?} \\ \beta_{1} &\sim \mathrm{Normal}(0,2) \\ \nu_{0} &\sim \mathrm{?} \\ i & \in \{\mathrm{expansive}, \mathrm{constrictive}\} \end{align} \]

We translate this thought into brms formula using the function bf.

model.1.formula <- bf(
                      # we think change is affected by different conditions.
                      change ~ condition,
                      # to tell brms which response distribution to use
                      family = student()
                    )

Now we can use get_prior() to inspect the available priors and formula. As what we learned, we have priors for

tibble(get_prior(model.1.formula, pose_df))
prior class coef group resp dpar nlpar lb ub source
b default
b conditionexpansive default
student_t(3, 22.6, 38.8) Intercept default
gamma(2, 0.1) nu 1 default
student_t(3, 0, 38.8) sigma 0 default

prior checks

We look at the default priors from brms.

cowplot::plot_grid(
tibble(x = qstudent_t(ppoints(n = 500), df = 3, mu = 22.6, sigma = 38.8)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('student_t(3, 22.6, 38.8) for Intercept') +
  coord_cartesian(xlim = c(-300, 300),expand = c(0)),
tibble(x = qgamma(ppoints(n = 500), shape = 2, rate = .1)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('Gamma(2, 0.1) for nu') +
  coord_cartesian(xlim = c(1, 100), expand = 0),
tibble(x = qstudent_t(ppoints(n = 500), df = 3, mu = 0, sigma = 38.8)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('student_t(3, 0, 38.8) for sigma') +
  coord_cartesian(xlim = c(0, 400),expand = c(0)),
ncol = 3)

We do a prior check for default priors to see the range of prior predictions.

model.1.checks_default <- brm(
  model.1.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    # need some priors for brms
    prior(normal(0, 3), class = 'b')
  ),
  # to allow draw from prior distributions
  sample_prior = 'only',
  backend = BRM_BACKEND,
  # save the model
  file = 'rds/model.1.checks_default.rds',
  file_refit = 'on_change' 
)
## Running MCMC with 4 sequential chains...
## 
## Chain 1 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 1 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 1 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 1 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 1 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 1 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 1 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 1 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 1 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 1 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 1 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 1 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 1 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 1 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 1 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 1 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 1 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 1 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 1 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 1 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 1 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 1 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 1 finished in 0.0 seconds.
## Chain 2 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 2 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 2 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 2 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 2 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 2 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 2 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 2 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 2 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 2 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 2 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 2 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 2 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 2 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 2 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 2 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 2 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 2 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 2 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 2 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 2 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 2 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 2 finished in 0.0 seconds.
## Chain 3 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 3 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 3 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 3 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 3 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 3 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 3 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 3 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 3 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 3 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 3 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 3 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 3 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 3 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 3 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 3 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 3 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 3 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 3 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 3 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 3 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 3 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 3 finished in 0.0 seconds.
## Chain 4 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 4 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 4 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 4 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 4 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 4 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 4 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 4 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 4 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 4 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 4 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 4 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 4 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 4 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 4 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 4 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 4 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 4 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 4 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 4 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 4 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 4 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 4 finished in 0.0 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 0.0 seconds.
## Total execution time: 0.7 seconds.

We draw from prior distributions using predicted_draws.

model.1.defaultpriorsamples <- 
  model.1.checks_default %>% 
    predicted_draws(tibble(condition = c('expansive', 'constrictive')))

Take a look at the prior prediction draws. You can ignore .row,.chain, ‘.iteration’. The .draw is the ID for each draw. .prediction is the value we care about.

head(model.1.defaultpriorsamples)
condition .row .chain .iteration .draw .prediction
expansive 1 NA NA 1 20.24646
expansive 1 NA NA 2 66.18096
expansive 1 NA NA 3 53.88548
expansive 1 NA NA 4 47.42815
expansive 1 NA NA 5 18.04056
expansive 1 NA NA 6 11.17446

We plot out the prediction draws. They look reasonable but have a wide range. We would expect so given the ranges of the priors.

model.1.defaultpriorsamples %>% 
  ggplot(aes(x = .prediction, group = condition)) +
  geom_density(alpha = .5, color = NA, adjust = 2, fill = theme_yellow) +
  theme_density_x + 
  ggtitle('Checks default priors of model.1')

We want to use narrow priors.

cowplot::plot_grid(
tibble(x = qstudent_t(ppoints(n = 1000), mu = 22.6, sigma = 10, df = 3)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('t(3, 22.6, 10) for Intercept') +
  coord_cartesian(xlim = c(-30, 80), expand = 0),
tibble(x = qnorm(ppoints(n = 1000), mean = 0, sd = 2)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('Normal(0, 2) for b') +
  coord_cartesian(xlim = c(-8, 8), expand = 0),
tibble(x = qnorm(ppoints(n = 1000), mean = 0, sd = 10)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('HalfNormal(0, 10) for sigma') +
  coord_cartesian(xlim = c(0, 50), expand = c(0)),
ncol = 3)

We can do another prior check.

model.1.checks <- brm(
  model.1.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    prior(student_t(3, 22.6, 10), class = "Intercept"),
    prior(normal(0, 2), class = 'b'),
    prior(normal(0, 10), class = 'sigma', lb = 0)
  ),
  sample_prior = 'only',
  backend = BRM_BACKEND,
  file = 'rds/model.1.checks.rds',
  file_refit = 'on_change' 
)

We draw from the new prior distribution.

model.1.priorsamples <- 
  model.1.checks %>% 
    predicted_draws(tibble(condition = c('expansive', 'constrictive')))

We compare two sets of prior distributions. Visually, using our priors have a narrower range.

cowplot::plot_grid(
  
model.1.defaultpriorsamples %>% 
  ggplot(aes(x = .prediction, group = condition)) +
  geom_density(alpha = .5, color = NA, adjust = 2, fill = theme_yellow) +
  theme_density_x + 
  # coord_cartesian(xlim = c(-800, 1000)) +
  ggtitle('Checks for default priors of model.1')
,
model.1.priorsamples %>% 
  ggplot(aes(x = .prediction, group = condition)) +
  geom_density(alpha = .5, color = NA, adjust = 2, fill = theme_yellow) +
  theme_density_x + 
  # coord_cartesian(xlim = c(-800, 1000)) +
  ggtitle('Checks for our priors of model.1')
,
ncol = 1)

Step 2: model fitting

Now we can fit the model. This model takes a

model.1 <- brm(
  model.1.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    prior(normal(0, 2), class = 'b'),
    prior(normal(0, 10), class = 'sigma', lb = 0)
  ),
  backend = BRM_BACKEND,
  file = 'rds/model.1.rds'
)

Step 3: check posteriors

aspect 1: mcmc traces

First, we check the MCMC traces to ensure that the chains are mixed well.

color_scheme_set("teal")
mcmc_trace(model.1, facet_args = list(ncol = 4))

plot(model.1)

aspect 2: model metrics

We also check Rhat and ESS. We want Rhat to be close to 1 to ensure model convergence. We want Bulk ESS (effective sample size) to be at least a few hundreds (ideally, this should be at least a thousand.) to ensure reliable estimate of mean. We also want Tail ESS to be at this level. If ESSs are too low, it means there are too many correlations in posteriors draws. You need to increase the number of iterations and perhaps the number of chains.

summary(model.1)
##  Family: student 
##   Links: mu = identity; sigma = identity; nu = identity 
## Formula: change ~ condition 
##    Data: pose_df (Number of observations: 80) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Population-Level Effects: 
##                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept             22.84      4.49    14.50    31.77 1.00     2686     2092
## conditionexpansive    -0.26      1.97    -4.10     3.62 1.00     2726     2987
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma    28.35      4.13    20.76    36.92 1.00     2180     2520
## nu        3.68      2.35     1.57     9.04 1.00     2172     2158
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

aspect 3: visually

Fumeng: I think this is bascially predictive checks below?

pose_df.bayesiant.y <- pose_df$change

pose_df.bayesiant.yrep <- posterior_predict(model.1, ndraws = 30, seed = 1234)

ppc_dens_overlay(pose_df.bayesiant.y, pose_df.bayesiant.yrep, linewidth = 2)

Now we check the posterior prediction of this model to ensure that it generates reasonable predictions.

model.1.predictions <- 
  model.1 %>% 
    predicted_draws(tibble(condition = c('expansive', 'constrictive')))
head(model.1.predictions)
condition .row .chain .iteration .draw .prediction
expansive 1 NA NA 1 72.1147799
expansive 1 NA NA 2 53.4216750
expansive 1 NA NA 3 0.6535984
expansive 1 NA NA 4 -9.8268421
expansive 1 NA NA 5 36.9552738
expansive 1 NA NA 6 23.2065242
plot_predictions <- function(model, df = NULL, title = ''){
  
  if(is.null(df))
      df = tibble(condition = c('expansive', 'constrictive'), 
                           participant = c(-1, -1))
  model %>% 
    predicted_draws(df,
                    seed = 1234,
                    ndraws = NULL,
                    allow_new_levels = TRUE,
                    sample_new_levels = 'uncertainty') %>% 
    ggplot(aes(x = .prediction, colour = condition), fill = NA) +
    geom_density(alpha = .5, size = 1, adjust = 2) +
    scale_color_theme() +
    theme_density_x + 
    scale_y_continuous(breaks = 0, labels = 'constrictive') + 
    scale_x_continuous(breaks = seq(-150, 250, by = 50)) +
    coord_cartesian(xlim = c(-150, 250)) + 
    ggtitle(paste0('Posterior predictions of ', deparse(substitute(model))))
}
cowplot::plot_grid(plot_predictions(model.1), 
                   p1 + 
                    scale_x_continuous(breaks = seq(-150, 250, by = 100)) +
                    coord_cartesian(xlim = c(-150, 250)), 
                   nrow = 2)

We now generate the posterior predictions of means, which are of interests here.

model.1.posteriors <- 
  model.1 %>% 
    epred_draws(tibble(condition = c('expansive', 'constrictive')))

Fumeng: I would save this

plot_posteriors <- function(model, df = NULL, title = ''){
  
  if(is.null(df))
     df = tibble(condition = c('expansive', 'constrictive'))
  
  model %>% 
    epred_draws(df, 
                # ignoring random effects if there is any
                #seed = 1234,
                ndraws = NULL,
                re_formula = NA) %>% 
    ggplot(aes(x = .epred, fill = condition)) +
    geom_density(alpha = .5, size = 1, adjust = 2, color = NA) +
    scale_color_theme() +
    theme_density_x + 
    scale_y_continuous(breaks = 0, labels = 'constrictive') + 
    scale_x_continuous(limits = c(-150, 250), breaks = seq(-150, 250, by = 50)) +
    ggtitle(paste0('Posterior means of ', deparse(substitute(model))))
}

cowplot::plot_grid(plot_posteriors(model.1), p1, nrow = 2)

Model 2: the BEST test model

Step 1: model specification

This model is the BEST test model as described by Kruschke in the paper Bayesian estimation supersedes the t-test. In this model, \(\beta\) indicates the mean difference in the outcome variable between the two groups (in this case, the percent change in the BART scores). We fit different priors on \(\beta\) and set different weights on these priors to obtain our posterior estimate.

\[ \begin{align} y_{i} &\sim \mathrm{T}(\nu, \mu, \sigma) \\ \mu &= \beta_{0} + \beta_{1} * x_i \\ \sigma &= \sigma_{a} + \sigma_{b}*x_i \\ \beta_{1} &\sim \mathrm{Normal}(\mu_{0}, \sigma_{0}) \\ \sigma_a, \sigma_b &\sim \mathrm{Cauchy}(0, 2) \\ \nu &\sim \mathrm{exp}(1/30)\\ i & \in \{\mathrm{expansive}, \mathrm{constrictive}\} \end{align} \]

model.2.formula <- bf(# we think change is affected by different conditions.
                      change ~ condition,
                      sigma ~ condition,
                      # to tell brms which response distribution to use
                      family = student())
tibble(get_prior(model.2.formula, pose_df))
prior class coef group resp dpar nlpar lb ub source
b default
b conditionexpansive default
student_t(3, 22.6, 38.8) Intercept default
gamma(2, 0.1) nu 1 default
b sigma default
b conditionexpansive sigma default
student_t(3, 0, 2.5) Intercept sigma default

prior check

Fumeng: not sure qcauchy has the correct paramaterization

cowplot::plot_grid(
  tibble(x = qnorm(ppoints(n = 1000),  mean = 0, sd = 2)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('Normal(0 ,2) for Intercept') +
  coord_cartesian(expand = c(0)),
tibble(x = qexp(ppoints(n = 1000), rate = 0.0333)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('exponential(0.0333) for nu') +
  coord_cartesian(expand = 0),
tibble(x = qcauchy(ppoints(n = 1000), location = 0, scale = 2)) %>% 
  ggplot() +
  geom_density(aes(x = x), fill = theme_yellow, color = NA) +
  ggtitle('cauchy(0, 2) for sigma') +
  coord_cartesian(expand = c(0)),
ncol = 3)

model.2.priorchecks <- brm(
  model.2.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    prior(normal(0, 2), class = 'b'),
    prior(cauchy(0, 2), class = 'b', dpar = 'sigma'),
    prior(exponential(0.0333), class = 'nu')
  ),
  sample_prior = 'only',
  backend = BRM_BACKEND,
  file = 'rds/model.2.priorchecks.rds',
  file_refit = 'on_change' 
)
## Running MCMC with 4 sequential chains...
## 
## Chain 1 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 1 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 1 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 1 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 1 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 1 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 1 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 1 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 1 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 1 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 1 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 1 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 1 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 1 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 1 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 1 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 1 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 1 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 1 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 1 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 1 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 1 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 1 finished in 0.1 seconds.
## Chain 2 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 2 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 2 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 2 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 2 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 2 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 2 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 2 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 2 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 2 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 2 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 2 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 2 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 2 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 2 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 2 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 2 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 2 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 2 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 2 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 2 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 2 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 2 finished in 0.1 seconds.
## Chain 3 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 3 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 3 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 3 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 3 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 3 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 3 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 3 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 3 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 3 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 3 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 3 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 3 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 3 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 3 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 3 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 3 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 3 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 3 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 3 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 3 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 3 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 3 finished in 0.1 seconds.
## Chain 4 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 4 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 4 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 4 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 4 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 4 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 4 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 4 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 4 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 4 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 4 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 4 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 4 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 4 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 4 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 4 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 4 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 4 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 4 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 4 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 4 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 4 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 4 finished in 1.6 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 0.5 seconds.
## Total execution time: 2.1 seconds.
model.2.priorchecks %>% 
 epred_draws(tibble(condition = c('expansive', 'constrictive'))) %>% 
  ggplot(aes(x = .epred, group = condition)) +
  geom_density(alpha = .5, color = NA, adjust = 2, fill = theme_yellow) +
  theme_density_x + 
  ggtitle('Checks priors of model.2')

Step 2: model fiting

model.2 <- brm(
  model.2.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    prior(normal(0, 2), class = 'b'),
    prior(cauchy(0, 2), class = 'b', dpar = 'sigma'),
    prior(exponential(0.0333), class = 'nu')
  ),
  backend = BRM_BACKEND,
  file = 'rds/model.2.rds',
  file_refit = 'on_change' 
)
## Running MCMC with 4 sequential chains...
## 
## Chain 1 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 1 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 1 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 1 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 1 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 1 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 1 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 1 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 1 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 1 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 1 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 1 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 1 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 1 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 1 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 1 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 1 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 1 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 1 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 1 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 1 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 1 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 1 finished in 0.3 seconds.
## Chain 2 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 2 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 2 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 2 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 2 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 2 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 2 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 2 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 2 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 2 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 2 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 2 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 2 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 2 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 2 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 2 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 2 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 2 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 2 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 2 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 2 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 2 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 2 finished in 0.3 seconds.
## Chain 3 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 3 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 3 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 3 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 3 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 3 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 3 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 3 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 3 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 3 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 3 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 3 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 3 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 3 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 3 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 3 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 3 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 3 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 3 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 3 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 3 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 3 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 3 finished in 0.2 seconds.
## Chain 4 Iteration:    1 / 2000 [  0%]  (Warmup) 
## Chain 4 Iteration:  100 / 2000 [  5%]  (Warmup) 
## Chain 4 Iteration:  200 / 2000 [ 10%]  (Warmup) 
## Chain 4 Iteration:  300 / 2000 [ 15%]  (Warmup) 
## Chain 4 Iteration:  400 / 2000 [ 20%]  (Warmup) 
## Chain 4 Iteration:  500 / 2000 [ 25%]  (Warmup) 
## Chain 4 Iteration:  600 / 2000 [ 30%]  (Warmup) 
## Chain 4 Iteration:  700 / 2000 [ 35%]  (Warmup) 
## Chain 4 Iteration:  800 / 2000 [ 40%]  (Warmup) 
## Chain 4 Iteration:  900 / 2000 [ 45%]  (Warmup) 
## Chain 4 Iteration: 1000 / 2000 [ 50%]  (Warmup) 
## Chain 4 Iteration: 1001 / 2000 [ 50%]  (Sampling) 
## Chain 4 Iteration: 1100 / 2000 [ 55%]  (Sampling) 
## Chain 4 Iteration: 1200 / 2000 [ 60%]  (Sampling) 
## Chain 4 Iteration: 1300 / 2000 [ 65%]  (Sampling) 
## Chain 4 Iteration: 1400 / 2000 [ 70%]  (Sampling) 
## Chain 4 Iteration: 1500 / 2000 [ 75%]  (Sampling) 
## Chain 4 Iteration: 1600 / 2000 [ 80%]  (Sampling) 
## Chain 4 Iteration: 1700 / 2000 [ 85%]  (Sampling) 
## Chain 4 Iteration: 1800 / 2000 [ 90%]  (Sampling) 
## Chain 4 Iteration: 1900 / 2000 [ 95%]  (Sampling) 
## Chain 4 Iteration: 2000 / 2000 [100%]  (Sampling) 
## Chain 4 finished in 0.3 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 0.3 seconds.
## Total execution time: 1.3 seconds.

Step 3: posterior checks

summary(model.2)
##  Family: student 
##   Links: mu = identity; sigma = log; nu = identity 
## Formula: change ~ condition 
##          sigma ~ condition
##    Data: pose_df (Number of observations: 80) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   24.34      4.78    15.59    34.22 1.00     2438
## sigma_Intercept              3.39      0.20     2.98     3.78 1.00     2351
## conditionexpansive          -0.11      1.98    -3.84     3.86 1.00     3651
## sigma_conditionexpansive     0.15      0.22    -0.27     0.60 1.00     3208
##                          Tail_ESS
## Intercept                    2247
## sigma_Intercept              2368
## conditionexpansive           2772
## sigma_conditionexpansive     2187
## 
## Family Specific Parameters: 
##    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## nu     5.47      6.44     1.67    19.56 1.00     1864     1893
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
wrap_plots(plot_predictions(model.2) + scale_x_continuous(limits = c(-200,250), expand = c(0,0), breaks = seq(-150, 250, by = 50)) 
                   , 
                   p1 + scale_x_continuous(limits = c(-200,250), expand = c(0,0), breaks = seq(-150, 250, by = 50)) , 
                   nrow = 2)

# draws.model.3 = 
wrap_plots(
  plot_posteriors(model.2), 
  p1,
nrow = 2)

model.2.posteriors <- model.2 %>% 
                epred_draws(tibble(condition = c('expansive', 'constrictive')), 
                #seed = 1234,
                ndraws = NULL,
                re_formula = NA) 

Model 3: the BEST model with better intercepts

Step 1: model specification

\[ \begin{align} y_{i} &\sim \mathrm{T}(\nu, \mu, \sigma) \\ \mu &= \beta_{i,j} + \beta_{1} * x_i \\ \sigma &= \sigma_{a} + \sigma_{b}*x_i \\ \beta_{1} &\sim \mathrm{Normal}(0, 2) \\ \sigma_a, \sigma_b &\sim \mathrm{HalfNormal}(0, 10) \\ \nu &\sim \mathrm{exp}(1/30)\\ i & \in \{\mathrm{expansive}, \mathrm{constrictive}\}\\ j & \in \{1, ..., \mathrm{N}\} \end{align} \]

model.3.formula <- bf(# we think change is affected by different conditions.
                      change ~ condition + (1|participant),
                      sigma ~ condition,
                      # to tell brms which response distribution to use
                      family = student())
tibble(get_prior(model.3.formula, pose_df))
prior class coef group resp dpar nlpar lb ub source
b default
b conditionexpansive default
student_t(3, 22.6, 38.8) Intercept default
gamma(2, 0.1) nu 1 default
student_t(3, 0, 38.8) sd 0 default
sd participant default
sd Intercept participant default
b sigma default
b conditionexpansive sigma default
student_t(3, 0, 2.5) Intercept sigma default

Step 2: model fiting

model.3 <- brm(
  model.3.formula,
  data = pose_df, 
  family = student_t(),
  prior = c(
    prior(normal(0, 2), class = 'b'),
    prior(normal(0, 2), class = 'b', dpar = 'sigma'),
    prior(normal(0, 2), class = 'sd', group = 'participant', lb = 0),
    prior(exponential(0.0333), class = 'nu')
  ),
  #backend = BRM_BACKEND,
  file = 'rds/model.3.rds',
  file_refit = 'on_change' 
)
## 
## SAMPLING FOR MODEL 'ba38676c9d7132d64327a1aa146857f5' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000104 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.04 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.420748 seconds (Warm-up)
## Chain 1:                0.365175 seconds (Sampling)
## Chain 1:                0.785923 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'ba38676c9d7132d64327a1aa146857f5' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 3.4e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.34 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.393045 seconds (Warm-up)
## Chain 2:                0.35593 seconds (Sampling)
## Chain 2:                0.748975 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'ba38676c9d7132d64327a1aa146857f5' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 2.6e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.413351 seconds (Warm-up)
## Chain 3:                0.370928 seconds (Sampling)
## Chain 3:                0.784279 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'ba38676c9d7132d64327a1aa146857f5' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 4.9e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.49 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.42008 seconds (Warm-up)
## Chain 4:                0.364696 seconds (Sampling)
## Chain 4:                0.784776 seconds (Total)
## Chain 4:

Step 3: posterior checks

summary(model.3)
##  Family: student 
##   Links: mu = identity; sigma = log; nu = identity 
## Formula: change ~ condition + (1 | participant) 
##          sigma ~ condition
##    Data: pose_df (Number of observations: 80) 
##   Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup draws = 4000
## 
## Group-Level Effects: 
## ~participant (Number of levels: 80) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     1.60      1.19     0.07     4.38 1.00     3082     1868
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   24.52      4.83    15.63    34.58 1.00     5484
## sigma_Intercept              3.40      0.20     3.02     3.79 1.00     5679
## conditionexpansive          -0.08      1.96    -3.94     3.70 1.00     7590
## sigma_conditionexpansive     0.15      0.21    -0.28     0.58 1.00     9517
##                          Tail_ESS
## Intercept                    2936
## sigma_Intercept              3322
## conditionexpansive           2232
## sigma_conditionexpansive     3065
## 
## Family Specific Parameters: 
##    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## nu     5.76      6.78     1.73    23.42 1.00     5117     2431
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
cowplot::plot_grid(plot_predictions(model.3), p1, nrow = 2)

cowplot::plot_grid(plot_posteriors(model.3), p1, nrow = 2)

model.3.posteriors <-
  model.3 %>% 
  epred_draws(tibble(condition = c('expansive', 'constrictive')),
               re_formula = NA)
wrap_plots(
model.3.posteriors %>%
  mutate(model = 'model 3') %>%
  rbind(
    model.2.posteriors %>% 
      mutate(model = 'model 2'))  %>% 
  rbind(
    model.1.posteriors %>% 
      mutate(model = 'model 1')) %>% 
ggplot() + 
  geom_density(aes(x = .epred, fill = condition), adjust = 1.5, color = NA, alpha = .5) +
  facet_grid(model ~ .) +
  scale_x_continuous(limits = c(-150, 250), breaks = seq(-150, 250, by = 50)) +
  scale_color_theme() +
  theme_density_x +
  ggtitle('Compare the means of all three models'),
p1, nrow = 2, heights = c(4,1.5))

Model 4: Negative-Binomial Regression model

Understanding the data

pose_raw_df = read.csv("data/posture_data-raw.csv") %>%
  mutate(participant = factor(participant)) %>%
  rename(trial = trial.number)

head(pose_raw_df)
participant condition total.money trial trial.money exploded pumps life
1 expansive 910 0 62 0 62 72
1 expansive 910 1 0 1 27 27
1 expansive 910 2 47 0 47 98
1 expansive 910 3 0 1 86 86
1 expansive 910 4 60 0 60 104
1 expansive 910 5 0 1 26 26
get_prior(pumps ~ condition + trial + (1|condition), data = pose_raw_df)
prior class coef group resp dpar nlpar lb ub source
b default
b conditionexpansive default
b trial default
student_t(3, 34, 20.8) Intercept default
student_t(3, 0, 20.8) sd 0 default
sd condition default
sd Intercept condition default
student_t(3, 0, 20.8) sigma 0 default
tibble(x = seq(1, 160, by = 1)) %>%
  ggplot(aes(x)) +
  geom_function(
    color = theme_blue,
    linetype = 'dashed',
    fun = function(x) # need to fix
      dnbinom(round(x), size = 5, mu = 64),
    size = 1
  ) +
  coord_cartesian(xlim = c(0, 160)) +
  theme(
    axis.line.y = element_blank(),
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank(),
    axis.text.x = element_text(size = 16)
  )

pose_raw_df %>%
  mutate(c = as.factor(condition)) %>%
  ggplot(aes(x = pumps)) +
  geom_histogram(
    aes(y = ..density..),
    binwidth = 2,
    fill = theme_yellow,
    alpha = .5,
    color = 'white'
  ) +
  geom_density(size = 1,
               adjust = 3,
               color = theme_blue) +
  geom_function(
    color = "#222222",
    linetype = 'dashed',
    fun = function(x) # need to fix
      dnbinom(round(x), size = 3, mu = 40), # this does not use the log-link but brm does so the prior below is # log(387)
    size = 1
  ) +
  theme(
    axis.line.y = element_blank(),
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank()
  )

Step 1: model specification

Abhraneel: not sure if this parameterisation is correct!!

\[ \begin{align} y_{i} &\sim \mathrm{Neg-Binomial}(\mu, \phi) \\ log(\mu) &= \beta_{0} + \beta_{0,j} + \beta_{1} * x_i \\ \beta_{0} &\sim \mathrm{Normal}(3.5, 0.5) \\ \beta_{0, j} &\sim \mathrm{Student\_t}(3, 0, 1) \\ \beta_{1} &\sim \mathrm{Normal}(0, 0.5) \\ \phi &\sim \mathrm{Gamma}(3, 1) \\ i & \in \{\mathrm{expansive}, \mathrm{constrictive}\}\\ j & \in \{1, ..., \mathrm{N}\} \end{align} \]

Step 2: prior predictive checks

model.4.prior_pred = brm(pumps ~ 1 + condition,
                      data = pose_raw_df, family = negbinomial(),
                      prior = c(prior(normal(4.1, 0.5), class = Intercept),
                                prior(normal(0, 0.5), class = b),
                                prior(gamma(5, 1), class = shape)
                      ),
                      backend = BRM_BACKEND,
                      file = 'rds/model.4.prior_predictive.rds',
                      file_refit = 'on_change' ,
                      sample_prior = "only",
                      iter = 4000, warmup = 1000, cores = 4, chains = 4)
model.4.prior_pred.samples <- model.4.prior_pred %>% 
    predicted_draws(tibble(condition = c('expansive', 'constrictive')), re_formula = NA)

head(model.4.prior_pred.samples)
condition .row .chain .iteration .draw .prediction
expansive 1 NA NA 1 22
expansive 1 NA NA 2 92
expansive 1 NA NA 3 131
expansive 1 NA NA 4 36
expansive 1 NA NA 5 29
expansive 1 NA NA 6 79
ggplot() +
geom_histogram(
  data = pose_raw_df,
  mapping = aes(x = pumps, y = ..density..),
  binwidth = 10,
  alpha = 0.5,
  fill = theme_yellow,
  color = 'white'
) +
geom_density(model.4.prior_pred.samples,
               mapping = aes(x = .prediction, y = ..density..), adjust = 1, stroke = theme_blue, size = 1) +
theme_density_x + 
coord_cartesian(xlim = c(0, 150)) +
theme(
  axis.text.x = element_text(size = 16)
)

Step 3: model fitting

model.4 = brm(pumps ~ 1 + condition,
                      data = pose_raw_df, family = negbinomial(),
                      prior = c(prior(normal(4.1, 0.5), class = Intercept),
                                prior(normal(0, 0.5), class = b),
                                prior(gamma(5, 1), class = shape)
                      ),
                      backend = BRM_BACKEND,
                      file = 'rds/model.4.rds',
                      file_refit = 'on_change' ,
                      iter = 4000, warmup = 1000, cores = 4, chains = 4)

summary(model.4)
##  Family: negbinomial 
##   Links: mu = log; shape = identity 
## Formula: pumps ~ 1 + condition 
##    Data: pose_raw_df (Number of observations: 2400) 
##   Draws: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup draws = 12000
## 
## Population-Level Effects: 
##                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept              3.64      0.01     3.61     3.66 1.00    11389     9056
## conditionexpansive    -0.01      0.02    -0.05     0.03 1.00    11512     8288
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## shape     4.59      0.15     4.32     4.88 1.00    11208     8380
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pose_raw_df.bayesian_poisson.y <- pose_raw_df$pumps

pose_raw_df.bayesian_poisson.yrep <- posterior_predict(model.4, ndraws = 30, seed = 1234)

ppc_dens_overlay(y = pose_raw_df.bayesian_poisson.y,
                 yrep = pose_raw_df.bayesian_poisson.yrep)

Step 4: model interpretation

The results of this model are on the log-odds scale. What do the coefficients mean? The simplest way is to simply transform the data into a more interpretable scale and visualise the results:

p.model.4 = pose_raw_df %>%
  ggplot() +
  geom_point(aes(x = pumps, y = condition, colour = condition), 
             position = position_jitter(height = 0.1), alpha = 0.7) +
  scale_color_theme() + 
  labs(y = "Condition") +
  theme(
    legend.position = "none", 
    axis.line.y = element_blank(), 
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank()
  ) +
  scale_x_continuous(limits = c(0, 130), breaks = seq(0, 150, by = 30))

draws.model.4 = plot_predictions(model.4, df = crossing(condition = c('expansive', 'constrictive'), 
                                          trial = 0:29,
                                          participant = 0)) + 
    scale_x_continuous(breaks = seq(0, 150, by = 30)) +
    coord_cartesian(xlim = c(0, 150))

wrap_plots(
  draws.model.4, 
  p.model.4,
nrow = 2)

We generate the average of 30 trials using an average participant.

model.4.posteriors <-  model.4 %>% 
    epred_draws(crossing(condition = c('expansive', 'constrictive'),     
                       trial = 0:29),
                re_formula = NA,
                allow_new_levels  = FALSE) %>% 
  mutate(model = 'model 4')  %>% 
  group_by(.draw, condition) %>% 
  summarise(.epred = mean(.epred))
model.4.posteriors %>%
  ungroup() %>%
  compare_levels(.epred, by = condition) %>%
  median_qi(.width = .95) %>%
  ggplot() + 
  geom_pointinterval(aes(x = .epred, y = condition, xmin = .lower, xmax = .upper)) +
  geom_vline(xintercept = 0, linetype = 2, color = "#979797") +
  scale_x_continuous(breaks = seq(-10, 10, by = 2)) +
  coord_cartesian(xlim = c(-10, 10)) +
  xlab("Difference in Mean") +
  theme(
    axis.text.x = element_text(size = 14),
    axis.text.y = element_text(size = 14),
    axis.title.x = element_text(size = 16),
    axis.title.y = element_blank()
  )

Plot out the posteriors for mean.

model.4.posteriors %>% 
 ggplot(aes(x = .epred, fill = condition)) +
    stat_halfeye(.width = .95) +
    scale_color_theme() +
    facet_wrap(condition ~ ., ncol = 1) + 
    theme_density_x + 
    scale_x_continuous(limits = c(30, 50), breaks = seq(0, 60, by = 10)) +
    ggtitle(paste0('Posterior means of ', deparse(substitute(model))))

From this, we can see that there does not appear to be a difference between the two conditions.

Reporting

Let’s use model 4

model

We will need the ESSs and RHat from this print. Also, if you want to report the standard deviation of random intercepts or CIs for other paramaters. You can find them via summary(..)

summary(model.4)
##  Family: negbinomial 
##   Links: mu = log; shape = identity 
## Formula: pumps ~ 1 + condition 
##    Data: pose_raw_df (Number of observations: 2400) 
##   Draws: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup draws = 12000
## 
## Population-Level Effects: 
##                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept              3.64      0.01     3.61     3.66 1.00    11389     9056
## conditionexpansive    -0.01      0.02    -0.05     0.03 1.00    11512     8288
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## shape     4.59      0.15     4.32     4.88 1.00    11208     8380
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Credible Intervals

We compute the credible intervals (CIs; Bayesian analogy to confidence intervals) for the two conditions.

model.4.posteriors.CI <- 
model.4.posteriors %>% 
  group_by(condition) %>% 
  median_qi(.epred, width = .95)
model.4.posteriors.CI
condition .epred .epred.lower .epred.upper width width.lower width.upper .width .point .interval
constrictive 37.95021 36.90030 39.02023 0.95 0.95 0.95 0.95 median qi
expansive 37.68128 36.64807 38.74991 0.95 0.95 0.95 0.95 median qi
model.4.posteriors %>% 
ggplot() +
  geom_density(aes(x = .epred, fill = condition), alpha = .5, color = NA) +
  geom_point(model.4.posteriors.CI, 
             mapping = aes(x = .epred, y = 0), size = 3) + 
  geom_errorbarh(model.4.posteriors.CI, 
                 mapping = aes(x = .epred, xmin = .epred.lower, xmax = .epred.upper, y = 0), height = 0, linewidth = 1.5) + 
  facet_wrap(condition ~ ., ncol = 1) + 
  scale_x_continuous(limits = c(0, 80)) + 
  scale_y_continuous(expand = c(.02,.02)) + 
  scale_color_theme() +
  theme_density_x 

subtraction

model.4.posteriors_diff <- 
model.4.posteriors %>% 
  ungroup() %>% 
  compare_levels(variable = .epred, by = condition) %>% 
  ungroup()
head(model.4.posteriors_diff)
.draw condition .epred
1 expansive - constrictive 0.3618687
2 expansive - constrictive 1.1572392
3 expansive - constrictive -0.3027622
4 expansive - constrictive -0.2970297
5 expansive - constrictive -0.6349005
6 expansive - constrictive -0.9968948
model.4.posteriors_diff.CI <-
model.4.posteriors_diff %>% 
  median_qi(.epred)

model.4.posteriors_diff.CI
.epred .lower .upper .width .point .interval
-0.2747123 -1.74477 1.238197 0.95 median qi
model.4.posteriors_diff %>% 
ggplot() +
  geom_density(aes(x = .epred), alpha = .5,  fill = 'skyblue', color = NA, adjust = 2) +
  geom_point(model.4.posteriors_diff.CI, 
             mapping = aes(x = .epred, y = 0), size = 3) + 
  geom_errorbarh(model.4.posteriors_diff.CI, 
                 mapping = aes(x = .epred, xmin = .lower, xmax = .upper, y = 0), height = 0, linewidth = 1.5) + 
  #scale_x_continuous(limits = c(-50, 50)) + 
  xlab('expansive - constrictive') +
  ggtitle('Mean difference in expansive and constrictive') + 
  geom_vline(xintercept = 0, linetype = 2) + 
  scale_y_continuous(expand = c(.02,.02)) + 
  scale_x_continuous(limits = c(-10, 10), breaks = seq(-10, 10, by = 5)) + 
  scale_color_theme() +
  theme_density_x 

Session info

sessionInfo()
## R version 4.2.3 (2023-03-15)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Big Sur ... 10.16
## 
## Matrix products: default
## BLAS:   /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRlapack.dylib
## 
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] cmdstanr_0.5.3      knitr_1.42          ggplot2_3.4.2      
##  [4] bayesplot_1.10.0    ggdist_3.2.1        tidybayes_3.0.4    
##  [7] brms_2.19.0         Rcpp_1.0.10         modelr_0.1.11      
## [10] broom.mixed_0.2.9.4 broom_1.0.4         patchwork_1.1.2    
## [13] gtools_3.9.4        forcats_1.0.0       tidyr_1.3.0        
## [16] purrr_1.0.1         tibble_3.2.1        dplyr_1.1.1        
## 
## loaded via a namespace (and not attached):
##   [1] colorspace_2.1-0     ellipsis_0.3.2       markdown_1.5        
##   [4] base64enc_0.1-3      rstudioapi_0.14      listenv_0.9.0       
##   [7] furrr_0.3.1          farver_2.1.1         rstan_2.21.8        
##  [10] bit64_4.0.5          svUnit_1.0.6         DT_0.27             
##  [13] fansi_1.0.4          mvtnorm_1.1-3        diffobj_0.3.5       
##  [16] bridgesampling_1.1-2 codetools_0.2-19     splines_4.2.3       
##  [19] cachem_1.0.7         shinythemes_1.2.0    jsonlite_1.8.4      
##  [22] shiny_1.7.4          readr_2.1.4          compiler_4.2.3      
##  [25] backports_1.4.1      Matrix_1.5-3         fastmap_1.1.1       
##  [28] cli_3.6.1            later_1.3.0          htmltools_0.5.4     
##  [31] prettyunits_1.1.1    tools_4.2.3          igraph_1.4.1        
##  [34] coda_0.19-4          gtable_0.3.3         glue_1.6.2          
##  [37] reshape2_1.4.4       posterior_1.4.1      jquerylib_0.1.4     
##  [40] vctrs_0.6.1          nlme_3.1-162         crosstalk_1.2.0     
##  [43] tensorA_0.36.2       xfun_0.38            stringr_1.5.0       
##  [46] globals_0.16.2       ps_1.7.4             mime_0.12           
##  [49] miniUI_0.1.1.1       lifecycle_1.0.3      future_1.32.0       
##  [52] zoo_1.8-11           scales_1.2.1         vroom_1.6.1         
##  [55] colourpicker_1.2.0   hms_1.1.3            promises_1.2.0.1    
##  [58] Brobdingnag_1.2-9    parallel_4.2.3       inline_0.3.19       
##  [61] shinystan_2.6.0      yaml_2.3.7           gridExtra_2.3       
##  [64] loo_2.6.0            StanHeaders_2.21.0-7 sass_0.4.5          
##  [67] stringi_1.7.12       highr_0.10           dygraphs_1.1.1.6    
##  [70] checkmate_2.1.0      pkgbuild_1.4.0       rlang_1.1.0         
##  [73] pkgconfig_2.0.3      matrixStats_0.63.0   distributional_0.3.2
##  [76] evaluate_0.20        lattice_0.20-45      labeling_0.4.2      
##  [79] rstantools_2.3.0     htmlwidgets_1.6.2    cowplot_1.1.1       
##  [82] bit_4.0.5            tidyselect_1.2.0     processx_3.8.0      
##  [85] parallelly_1.35.0    plyr_1.8.8           magrittr_2.0.3      
##  [88] R6_2.5.1             generics_0.1.3       DBI_1.1.3           
##  [91] pillar_1.9.0         withr_2.5.0          xts_0.13.0          
##  [94] abind_1.4-5          crayon_1.5.2         arrayhelpers_1.1-0  
##  [97] utf8_1.2.3           tzdb_0.3.0           rmarkdown_2.20      
## [100] grid_4.2.3           data.table_1.14.8    callr_3.7.3         
## [103] threejs_0.3.3        digest_0.6.31        xtable_1.8-4        
## [106] httpuv_1.6.9         RcppParallel_5.1.7   stats4_4.2.3        
## [109] munsell_0.5.0        bslib_0.4.2          shinyjs_2.1.0